Define Linux-style <preempt.h> interface.
authorKeir Fraser <keir@xen.org>
Thu, 18 Nov 2010 11:45:33 +0000 (11:45 +0000)
committerKeir Fraser <keir@xen.org>
Thu, 18 Nov 2010 11:45:33 +0000 (11:45 +0000)
Use it to disable sleeping in spinlock and rcu-read regions.

Signed-off-by: Keir Fraser <keir@xen.org>
xen/common/Makefile
xen/common/preempt.c [new file with mode: 0644]
xen/common/schedule.c
xen/common/softirq.c
xen/common/spinlock.c
xen/include/xen/lib.h
xen/include/xen/preempt.h [new file with mode: 0644]
xen/include/xen/rcupdate.h
xen/include/xen/spinlock.h

index 1abca7d64a9f290b8e7b8b111525d1fa889d35df..d244e39c319e0a8e795ebb37a1e0df2fc1158ec9 100644 (file)
@@ -13,6 +13,7 @@ obj-y += memory.o
 obj-y += multicall.o
 obj-y += notifier.o
 obj-y += page_alloc.o
+obj-y += preempt.o
 obj-y += rangeset.o
 obj-y += sched_credit.o
 obj-y += sched_credit2.o
diff --git a/xen/common/preempt.c b/xen/common/preempt.c
new file mode 100644 (file)
index 0000000..e2133ea
--- /dev/null
@@ -0,0 +1,25 @@
+/******************************************************************************
+ * preempt.c
+ * 
+ * Track atomic regions in the hypervisor which disallow sleeping.
+ * 
+ * Copyright (c) 2010, Keir Fraser <keir@xen.org>
+ * 
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * 
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <xen/preempt.h>
+
+DEFINE_PER_CPU(unsigned int, __preempt_count);
index 5e86082b15e8a81f207d036fc2a55aa5f8f65dea..2415ce1554aca101efffdce57698c2305bfcb6a3 100644 (file)
@@ -32,6 +32,7 @@
 #include <xen/guest_access.h>
 #include <xen/multicall.h>
 #include <xen/cpu.h>
+#include <xen/preempt.h>
 #include <public/sched.h>
 #include <xsm/xsm.h>
 
index fb5fcd147d4f734f378e99fdce19b41964c8589f..30559ce7b2082348a3270b4fe43b947abd007570 100644 (file)
@@ -12,6 +12,7 @@
 #include <xen/config.h>
 #include <xen/init.h>
 #include <xen/mm.h>
+#include <xen/preempt.h>
 #include <xen/sched.h>
 #include <xen/rcupdate.h>
 #include <xen/softirq.h>
index fff796171d8b6f15297affacfaaf9762e888a2d4..2abf89e9b11dbe2aa3130e14a8bf217856870595 100644 (file)
@@ -5,6 +5,7 @@
 #include <xen/time.h>
 #include <xen/spinlock.h>
 #include <xen/guest_access.h>
+#include <xen/preempt.h>
 #include <public/sysctl.h>
 #include <asm/processor.h>
 
@@ -39,22 +40,9 @@ void spin_debug_disable(void)
     atomic_dec(&spin_debug);
 }
 
-static DEFINE_PER_CPU(atomic_t, lockdepth);
-
-#define lockdepth_inc() atomic_inc(&this_cpu(lockdepth))
-#define lockdepth_dec() atomic_dec(&this_cpu(lockdepth))
-
-unsigned int locking_depth(void)
-{
-    return atomic_read(&this_cpu(lockdepth));
-}
-
 #else /* defined(NDEBUG) */
 
 #define check_lock(l) ((void)0)
-#define lockdepth_inc() ((void)0)
-#define lockdepth_dec() ((void)0)
-unsigned int locking_depth(void) { return 0; }
 
 #endif
 
@@ -94,7 +82,7 @@ void _spin_lock(spinlock_t *lock)
             cpu_relax();
     }
     LOCK_PROFILE_GOT;
-    lockdepth_inc();
+    preempt_disable();
 }
 
 void _spin_lock_irq(spinlock_t *lock)
@@ -113,7 +101,7 @@ void _spin_lock_irq(spinlock_t *lock)
         local_irq_disable();
     }
     LOCK_PROFILE_GOT;
-    lockdepth_inc();
+    preempt_disable();
 }
 
 unsigned long _spin_lock_irqsave(spinlock_t *lock)
@@ -132,20 +120,20 @@ unsigned long _spin_lock_irqsave(spinlock_t *lock)
         local_irq_save(flags);
     }
     LOCK_PROFILE_GOT;
-    lockdepth_inc();
+    preempt_disable();
     return flags;
 }
 
 void _spin_unlock(spinlock_t *lock)
 {
-    lockdepth_dec();
+    preempt_enable();
     LOCK_PROFILE_REL;
     _raw_spin_unlock(&lock->raw);
 }
 
 void _spin_unlock_irq(spinlock_t *lock)
 {
-    lockdepth_dec();
+    preempt_enable();
     LOCK_PROFILE_REL;
     _raw_spin_unlock(&lock->raw);
     local_irq_enable();
@@ -153,7 +141,7 @@ void _spin_unlock_irq(spinlock_t *lock)
 
 void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
 {
-    lockdepth_dec();
+    preempt_enable();
     LOCK_PROFILE_REL;
     _raw_spin_unlock(&lock->raw);
     local_irq_restore(flags);
@@ -173,7 +161,7 @@ int _spin_trylock(spinlock_t *lock)
 #ifdef LOCK_PROFILE
     lock->profile.time_locked = NOW();
 #endif
-    lockdepth_inc();
+    preempt_disable();
     return 1;
 }
 
@@ -247,7 +235,7 @@ void _read_lock(rwlock_t *lock)
 {
     check_lock(&lock->debug);
     _raw_read_lock(&lock->raw);
-    lockdepth_inc();
+    preempt_disable();
 }
 
 void _read_lock_irq(rwlock_t *lock)
@@ -256,7 +244,7 @@ void _read_lock_irq(rwlock_t *lock)
     local_irq_disable();
     check_lock(&lock->debug);
     _raw_read_lock(&lock->raw);
-    lockdepth_inc();
+    preempt_disable();
 }
 
 unsigned long _read_lock_irqsave(rwlock_t *lock)
@@ -265,26 +253,26 @@ unsigned long _read_lock_irqsave(rwlock_t *lock)
     local_irq_save(flags);
     check_lock(&lock->debug);
     _raw_read_lock(&lock->raw);
-    lockdepth_inc();
+    preempt_disable();
     return flags;
 }
 
 void _read_unlock(rwlock_t *lock)
 {
-    lockdepth_dec();
+    preempt_enable();
     _raw_read_unlock(&lock->raw);
 }
 
 void _read_unlock_irq(rwlock_t *lock)
 {
-    lockdepth_dec();
+    preempt_enable();
     _raw_read_unlock(&lock->raw);
     local_irq_enable();
 }
 
 void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 {
-    lockdepth_dec();
+    preempt_enable();
     _raw_read_unlock(&lock->raw);
     local_irq_restore(flags);
 }
@@ -293,7 +281,7 @@ void _write_lock(rwlock_t *lock)
 {
     check_lock(&lock->debug);
     _raw_write_lock(&lock->raw);
-    lockdepth_inc();
+    preempt_disable();
 }
 
 void _write_lock_irq(rwlock_t *lock)
@@ -302,7 +290,7 @@ void _write_lock_irq(rwlock_t *lock)
     local_irq_disable();
     check_lock(&lock->debug);
     _raw_write_lock(&lock->raw);
-    lockdepth_inc();
+    preempt_disable();
 }
 
 unsigned long _write_lock_irqsave(rwlock_t *lock)
@@ -311,7 +299,7 @@ unsigned long _write_lock_irqsave(rwlock_t *lock)
     local_irq_save(flags);
     check_lock(&lock->debug);
     _raw_write_lock(&lock->raw);
-    lockdepth_inc();
+    preempt_disable();
     return flags;
 }
 
@@ -320,26 +308,26 @@ int _write_trylock(rwlock_t *lock)
     check_lock(&lock->debug);
     if ( !_raw_write_trylock(&lock->raw) )
         return 0;
-    lockdepth_inc();
+    preempt_disable();
     return 1;
 }
 
 void _write_unlock(rwlock_t *lock)
 {
-    lockdepth_dec();
+    preempt_enable();
     _raw_write_unlock(&lock->raw);
 }
 
 void _write_unlock_irq(rwlock_t *lock)
 {
-    lockdepth_dec();
+    preempt_enable();
     _raw_write_unlock(&lock->raw);
     local_irq_enable();
 }
 
 void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 {
-    lockdepth_dec();
+    preempt_enable();
     _raw_write_unlock(&lock->raw);
     local_irq_restore(flags);
 }
index a5a27d58abb917e624d5e7690294ad5811e4f40b..e09929a604d798cf1bc8bd1213db71980963cf9e 100644 (file)
@@ -119,6 +119,4 @@ extern void add_taint(unsigned);
 struct cpu_user_regs;
 void dump_execstate(struct cpu_user_regs *);
 
-#define in_atomic() (locking_depth() || in_irq() || !local_irq_is_enabled())
-
 #endif /* __LIB_H__ */
diff --git a/xen/include/xen/preempt.h b/xen/include/xen/preempt.h
new file mode 100644 (file)
index 0000000..749bc58
--- /dev/null
@@ -0,0 +1,33 @@
+/******************************************************************************
+ * preempt.h
+ * 
+ * Track atomic regions in the hypervisor which disallow sleeping.
+ * 
+ * Copyright (c) 2010, Keir Fraser <keir@xen.org>
+ */
+
+#ifndef __XEN_PREEMPT_H__
+#define __XEN_PREEMPT_H__
+
+#include <xen/config.h>
+#include <xen/percpu.h>
+#include <xen/irq.h>    /* in_irq() */
+#include <asm/system.h> /* local_irq_is_enabled() */
+
+DECLARE_PER_CPU(unsigned int, __preempt_count);
+
+#define preempt_count() (this_cpu(__preempt_count))
+
+#define preempt_disable() do {                  \
+    preempt_count()++;                          \
+    barrier();                                  \
+} while (0)
+
+#define preempt_enable() do {                   \
+    barrier();                                  \
+    preempt_count()--;                          \
+} while (0)
+
+#define in_atomic() (preempt_count() || in_irq() || !local_irq_is_enabled())
+
+#endif /* __XEN_PREEMPT_H__ */
index 68f98acfda73947adb7059b11ed1cd8ca2939af9..bb4af9d6e0608d967799426cf679e6ff1b208a34 100644 (file)
@@ -36,6 +36,7 @@
 #include <xen/spinlock.h>
 #include <xen/percpu.h>
 #include <xen/cpumask.h>
+#include <xen/preempt.h>
 
 /**
  * struct rcu_head - callback structure for use with RCU
@@ -145,14 +146,14 @@ typedef struct _rcu_read_lock rcu_read_lock_t;
  *
  * It is illegal to block while in an RCU read-side critical section.
  */
-#define rcu_read_lock(x)       ((void)(x))
+#define rcu_read_lock(x)       ({ ((void)(x)); preempt_disable(); })
 
 /**
  * rcu_read_unlock - marks the end of an RCU read-side critical section.
  *
  * See rcu_read_lock() for more information.
  */
-#define rcu_read_unlock(x)     ((void)(x))
+#define rcu_read_unlock(x)     ({ ((void)(x)); preempt_enable(); })
 
 /*
  * So where is rcu_write_lock()?  It does not exist, as there is no
index 6bb0cd6d1c3568d419fe38962ac133538990d384..a2d1ed99bf0625d87d9e3edbad0401537994d05a 100644 (file)
@@ -223,6 +223,4 @@ int _rw_is_write_locked(rwlock_t *lock);
 #define rw_is_locked(l)               _rw_is_locked(l)
 #define rw_is_write_locked(l)         _rw_is_write_locked(l)
 
-unsigned int locking_depth(void);
-
 #endif /* __SPINLOCK_H__ */